const runtime.pageSize
75 uses
runtime (current package)
arena.go#L196: userArenaChunkPages = userArenaChunkBytes / pageSize
arena.go#L206: if userArenaChunkPages*pageSize != userArenaChunkBytes {
arena.go#L857: if s.npages*pageSize != userArenaChunkBytes {
arena.go#L878: sysFault(unsafe.Pointer(s.base()), s.npages*pageSize)
arena.go#L883: gcController.heapInUse.add(-int64(s.npages * pageSize))
arena.go#L896: atomic.Xaddint64(&stats.committed, -int64(s.npages*pageSize))
arena.go#L897: atomic.Xaddint64(&stats.inHeap, -int64(s.npages*pageSize))
arena.go#L938: if s.npages*pageSize != userArenaChunkBytes {
malloc.go#L118: pageSize = _PageSize
malloc.go#L258: pagesPerArena = heapArenaBytes / pageSize
mbitmap.go#L579: return heapBitsSlice(span.base(), pageSize)
mbitmap.go#L581: return heapBitsSlice(span.base(), span.npages*pageSize)
mbitmap.go#L602: spanSize := span.npages * pageSize
mbitmap.go#L670: dst := unsafe.Pointer(span.base() + pageSize - pageSize/goarch.PtrSize/8)
mcache.go#L213: gcController.update(int64(s.npages*pageSize)-int64(usedBytes), int64(c.scanAlloc))
mcache.go#L242: atomic.Xadd64(&stats.largeAlloc, int64(npages*pageSize))
mcache.go#L247: gcController.totalAlloc.Add(int64(npages * pageSize))
mcache.go#L250: gcController.update(int64(s.npages*pageSize), 0)
mgcscavenge.go#L130: maxPagesPerPhysPage = maxPhysPageSize / pageSize
mgcscavenge.go#L739: maxPages := max / pageSize
mgcscavenge.go#L740: if max%pageSize != 0 {
mgcscavenge.go#L749: minPages := physPageSize / pageSize
mgcscavenge.go#L763: addr := chunkBase(ci) + uintptr(base)*pageSize
mgcscavenge.go#L778: sysUnused(unsafe.Pointer(addr), uintptr(npages)*pageSize)
mgcscavenge.go#L782: nbytes := int64(npages * pageSize)
mgcscavenge.go#L805: return uintptr(npages) * pageSize
mgcscavenge.go#L962: if physHugePageSize > pageSize && physHugePageSize > physPageSize {
mgcscavenge.go#L969: pagesPerHugePage := physHugePageSize / pageSize
mgcscavenge.go#L1104: newSearchAddr := chunkBase(i) + pallocChunkBytes - pageSize
mgcscavenge.go#L1153: addr := chunkBase(ci) + uintptr(page+npages-1)*pageSize
mgcwork.go#L27: if workbufAlloc%pageSize != 0 || workbufAlloc%_WorkbufSize != 0 {
mgcwork.go#L378: s = mheap_.allocManual(workbufAlloc/pageSize, spanAllocWorkBuf)
mheap.go#L702: return ha.spans[(p/pageSize)%pagesPerArena]
mheap.go#L713: return mheap_.arenas[ai.l1()][ai.l2()].spans[(p/pageSize)%pagesPerArena]
mheap.go#L740: pageIdx = ((p / pageSize) / 8) % uintptr(len(arena.pageInUse))
mheap.go#L741: pageMask = byte(1 << ((p / pageSize) % 8))
mheap.go#L928: trace.GCSweepSpan((n0 - nFreed) * pageSize)
mheap.go#L1002: p := base / pageSize
mheap.go#L1008: ai = arenaIndex(base + n*pageSize)
mheap.go#L1050: arenaLimit := arenaBase + npage*pageSize
mheap.go#L1073: npage -= (arenaLimit - arenaBase) / pageSize
mheap.go#L1186: needPhysPageAlign := physPageAlignedStacks && typ == spanAllocStack && pageSize < physPageSize
mheap.go#L1220: extraPages := physPageSize / pageSize
mheap.go#L1349: nbytes := npages * pageSize
mheap.go#L1397: nbytes := npages * pageSize
mheap.go#L1488: ask := alignUp(npage, pallocChunkPages) * pageSize
mheap.go#L1649: nbytes := s.npages * pageSize
mheap.go#L1714: span.limit = base + npages*pageSize // see go.dev/issue/74288; adjusted later for heap spans
mheap.go#L1851: arenaPage := (s.base() / pageSize) % pagesPerArena
mheap.go#L1859: arenaPage := (s.base() / pageSize) % pagesPerArena
mpagealloc.go#L59: pallocChunkBytes = pallocChunkPages * pageSize
mpagealloc.go#L118: return uint(p % pallocChunkBytes / pageSize)
mpagealloc.go#L428: p.update(base, size/pageSize, true, false)
mpagealloc.go#L489: limit := base + npages*pageSize - 1
mpagealloc.go#L573: limit := base + npages*pageSize - 1
mpagealloc.go#L602: return uintptr(scav) * pageSize
mpagealloc.go#L772: foundFree(levelIndexToOffAddr(l, i+j), (uintptr(1)<<logMaxPages)*pageSize)
mpagealloc.go#L810: addr := levelIndexToOffAddr(l, i).add(uintptr(base) * pageSize).addr()
mpagealloc.go#L851: addr := chunkBase(ci) + uintptr(j)*pageSize
mpagealloc.go#L855: searchAddr := chunkBase(ci) + uintptr(searchIdx)*pageSize
mpagealloc.go#L894: addr = chunkBase(i) + uintptr(j)*pageSize
mpagealloc.go#L895: searchAddr = offAddr{chunkBase(i) + uintptr(searchIdx)*pageSize}
mpagealloc.go#L940: limit := base + npages*pageSize - 1
mpagecache.go#L46: return c.base + i*pageSize, uintptr(scav) * pageSize
mpagecache.go#L66: return c.base + uintptr(i*pageSize), uintptr(scav) * pageSize
mpagecache.go#L138: base: chunkBase(ci) + alignDown(uintptr(j), 64)*pageSize,
mpagecache.go#L155: base: alignDown(addr, 64*pageSize),
mpagecache.go#L181: p.searchAddr = offAddr{c.base + pageSize*(pageCachePages-1)}
msize.go#L29: reqSize += pageSize - 1
msize.go#L33: return reqSize &^ (pageSize - 1)
traceallocfree.go#L39: w.varint(uint64(pageSize))
traceallocfree.go#L107: return traceArg(uint64(s.base())-trace.minPageHeapAddr) / pageSize